From c5f20f6be82879b386a3a00e782d4834ad0d4c69 Mon Sep 17 00:00:00 2001 From: "iap10@labyrinth.cl.cam.ac.uk" Date: Wed, 2 Feb 2005 13:01:09 +0000 Subject: [PATCH] bitkeeper revision 1.1159.238.4 (4200cf15b8T6NKAN_1Kqx3nMy4I7WA) slab.c in Linux is not a very nice piece of code: the version in Xen has been hacked a certain amount and is not a vision of beauty either. Given how rare and non-time-critical dynamic allocations are in Xen, this replaces the 1800-line slab.c with a 160-line malloc.c which is written as simply as possible for future enhancement. Signed-off-by: Rusty Russell (authored) Signed-off-by: ian.pratt@cl.cam.ac.uk --- .rootkeys | 1 + xen/arch/x86/setup.c | 4 - xen/common/malloc.c | 164 +++++++++++++++++++++++++++++++++++++++ xen/common/page_alloc.c | 10 +-- xen/include/xen/domain.h | 2 - xen/include/xen/lib.h | 12 +++ xen/include/xen/list.h | 12 +++ xen/include/xen/slab.h | 39 +--------- 8 files changed, 194 insertions(+), 50 deletions(-) create mode 100644 xen/common/malloc.c diff --git a/.rootkeys b/.rootkeys index a00f425304..b9056c0cd3 100644 --- a/.rootkeys +++ b/.rootkeys @@ -923,6 +923,7 @@ 3ddb79bd9drcFPVxd4w2GPOIjLlXpA xen/common/kernel.c 3e4cd9d8LAAghUY0hNIK72uc2ch_Nw xen/common/keyhandler.c 3ddb79bduhSEZI8xa7IbGQCpap5y2A xen/common/lib.c +4200cf14XGr26_PCC8NxREDhr7Hk5Q xen/common/malloc.c 41a61536SZbR6cj1ukWTb0DYU-vz9w xen/common/multicall.c 3ddb79bdD4SLmmdMD7yLW5HcUWucXw xen/common/page_alloc.c 3e54c38dkHAev597bPr71-hGzTdocg xen/common/perfc.c diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c index f38fc62ff4..6d029bed32 100644 --- a/xen/arch/x86/setup.c +++ b/xen/arch/x86/setup.c @@ -598,10 +598,6 @@ void __init __start_xen(multiboot_info_t *mbi) early_boot = 0; - /* Initialise the slab allocator. */ - xmem_cache_init(); - xmem_cache_sizes_init(max_page); - start_of_day(); grant_table_init(); diff --git a/xen/common/malloc.c b/xen/common/malloc.c new file mode 100644 index 0000000000..9d2750a0d5 --- /dev/null +++ b/xen/common/malloc.c @@ -0,0 +1,164 @@ +/* Simple allocator for Xen. If larger than a page, simply use the + * page-order allocator. + * + * Copyright (C) 2005 Rusty Russell IBM Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#include +#include +#include + +#define BUG_ON(x) do { if (x) BUG(); }while(0) + +static LIST_HEAD(freelist); +static spinlock_t freelist_lock = SPIN_LOCK_UNLOCKED; + +struct xmalloc_hdr +{ + /* Total including this hdr: negative means allocated. */ + long size; + union { + struct list_head freelist; + char data[0]; + } u; +}; + +static void maybe_split(struct xmalloc_hdr *hdr, size_t size, size_t block) +{ + size_t leftover = block - size; + + /* If enough left to make a block, put it on free list. */ + if (leftover >= sizeof(struct xmalloc_hdr)) { + struct xmalloc_hdr *extra; + + extra = (void *)hdr + size; + extra->size = leftover; + list_add(&extra->u.freelist, &freelist); + } else + size = block; + + hdr->size = -size; +} + +static void *xmalloc_new_page(size_t size) +{ + struct xmalloc_hdr *hdr; + unsigned long flags; + + hdr = (void *)alloc_xenheap_pages(0); + if (!hdr) + return NULL; + + spin_lock_irqsave(&freelist_lock, flags); + maybe_split(hdr, size, PAGE_SIZE); + spin_unlock_irqrestore(&freelist_lock, flags); + return hdr->u.data; +} + +/* Big object? Just use page allocator. */ +static void *xmalloc_whole_pages(size_t size) +{ + struct xmalloc_hdr *hdr; + unsigned int pageorder = get_order(size); + + hdr = (void *)alloc_xenheap_pages(pageorder); + if (!hdr) + return NULL; + + hdr->size = -(1 << (pageorder + PAGE_SHIFT)); + return hdr->u.data; +} + +void *__xmalloc(size_t size, const char *file, unsigned int line) +{ + struct xmalloc_hdr *i; + unsigned long flags; + + /* Add room for header, align to unsigned long. */ + size += offsetof(struct xmalloc_hdr, u.data); + size = ((size + sizeof(unsigned long)-1)&~(sizeof(unsigned long)-1)); + + /* Minimum size is size of freelist entry. */ + if (size < sizeof(*i)) + size = sizeof(*i); + + /* For big allocs, give them whole pages. */ + if (size >= PAGE_SIZE) + return xmalloc_whole_pages(size); + + /* Search free list */ + spin_lock_irqsave(&freelist_lock, flags); + list_for_each_entry(i, &freelist, u.freelist) { + if (i->size >= size) { + list_del(&i->u.freelist); + maybe_split(i, size, i->size); + spin_unlock_irqrestore(&freelist_lock, flags); + return i->u.data; + } + } + spin_unlock_irqrestore(&freelist_lock, flags); + + /* Alloc a new page and return from that. */ + return xmalloc_new_page(size); +} + +void __xfree(const void *p, const char *file, unsigned int line) +{ + unsigned long flags; + struct xmalloc_hdr *i, *tmp, *hdr; + + if (!p) + return; + + hdr = container_of((void *)p, struct xmalloc_hdr, u.data); + + /* We know hdr will be on same page. */ + BUG_ON(((long)p & PAGE_MASK) != ((long)hdr & PAGE_MASK)); + + /* Not previously freed. */ + BUG_ON(hdr->size > 0); + hdr->size = -hdr->size; + + /* Big allocs free directly. */ + if (hdr->size >= PAGE_SIZE) { + free_xenheap_pages((unsigned long)hdr, get_order(hdr->size)); + return; + } + + /* Merge with other free block, or put in list. */ + spin_lock_irqsave(&freelist_lock, flags); + list_for_each_entry_safe(i, tmp, &freelist, u.freelist) { + /* We follow this block? Swallow it. */ + if ((void *)i + i->size == (void *)hdr) { + list_del(&i->u.freelist); + i->size += hdr->size; + hdr = i; + } + /* It follows us? Delete it and add it to us. */ + if ((void *)hdr + hdr->size == (void *)i) { + list_del(&i->u.freelist); + hdr->size += i->size; + } + } + + /* Did we free entire page? */ + if (hdr->size == PAGE_SIZE) { + BUG_ON((((unsigned long)hdr) & (PAGE_SIZE-1)) != 0); + free_xenheap_pages((unsigned long)hdr, 0); + } else + list_add(&hdr->u.freelist, &freelist); + spin_unlock_irqrestore(&freelist_lock, flags); +} diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c index eae1692eb1..1471170079 100644 --- a/xen/common/page_alloc.c +++ b/xen/common/page_alloc.c @@ -403,9 +403,8 @@ unsigned long alloc_xenheap_pages(unsigned int order) { unsigned long flags; struct pfn_info *pg; - int i, attempts = 0; + int i; - retry: local_irq_save(flags); pg = alloc_heap_pages(MEMZONE_XEN, order); local_irq_restore(flags); @@ -425,14 +424,7 @@ unsigned long alloc_xenheap_pages(unsigned int order) return (unsigned long)page_to_virt(pg); no_memory: - if ( attempts++ < 8 ) - { - xmem_cache_reap(); - goto retry; - } - printk("Cannot handle page request order %d!\n", order); - dump_slabinfo(); return 0; } diff --git a/xen/include/xen/domain.h b/xen/include/xen/domain.h index c2d0dbc144..d502f18871 100644 --- a/xen/include/xen/domain.h +++ b/xen/include/xen/domain.h @@ -6,8 +6,6 @@ * Arch-specifics. */ -extern void domain_startofday(void); - extern struct domain *arch_alloc_domain_struct(void); extern void arch_free_domain_struct(struct domain *d); diff --git a/xen/include/xen/lib.h b/xen/include/xen/lib.h index 6ce764b7fc..c6f45580d8 100644 --- a/xen/include/xen/lib.h +++ b/xen/include/xen/lib.h @@ -20,6 +20,18 @@ struct domain; void cmdline_parse(char *cmdline); +/** + * container_of - cast a member of a structure out to the containing structure + * + * @ptr: the pointer to the member. + * @type: the type of the container struct this is embedded in. + * @member: the name of the member within the struct. + * + */ +#define container_of(ptr, type, member) ({ \ + const typeof( ((type *)0)->member ) *__mptr = (ptr); \ + (type *)( (char *)__mptr - offsetof(type,member) );}) + #define printk printf void printf(const char *format, ...); void panic(const char *format, ...); diff --git a/xen/include/xen/list.h b/xen/include/xen/list.h index 7b19bb4650..93d9f987e7 100644 --- a/xen/include/xen/list.h +++ b/xen/include/xen/list.h @@ -174,5 +174,17 @@ static __inline__ void list_splice(struct list_head *list, struct list_head *hea pos = list_entry(pos->member.next, typeof(*pos), member), \ prefetch(pos->member.next)) +/** + * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry + * @pos: the type * to use as a loop counter. + * @n: another type * to use as temporary storage + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + */ +#define list_for_each_entry_safe(pos, n, head, member) \ + for (pos = list_entry((head)->next, typeof(*pos), member), \ + n = list_entry(pos->member.next, typeof(*pos), member); \ + &pos->member != (head); \ + pos = n, n = list_entry(n->member.next, typeof(*n), member)) #endif /* _LINUX_LIST_H */ diff --git a/xen/include/xen/slab.h b/xen/include/xen/slab.h index 25406617dc..8fed6444ff 100644 --- a/xen/include/xen/slab.h +++ b/xen/include/xen/slab.h @@ -13,45 +13,14 @@ #include #else - -typedef struct xmem_cache_s xmem_cache_t; - #include #include #include -/* Flags to pass to xmem_cache_create(). */ -/* NB. The first 3 are only valid when built with SLAB_DEBUG_SUPPORT. */ -#define SLAB_DEBUG_INITIAL 0x00000200UL /* Call constructor */ -#define SLAB_RED_ZONE 0x00000400UL /* Red zone objs in a cache */ -#define SLAB_POISON 0x00000800UL /* Poison objects */ -#define SLAB_NO_REAP 0x00001000UL /* never reap from the cache */ -#define SLAB_HWCACHE_ALIGN 0x00002000UL /* align obj on a cache line */ - -/* Flags passed to a constructor function. */ -#define SLAB_CTOR_CONSTRUCTOR 0x001UL /* if not set, then deconstructor */ -#define SLAB_CTOR_ATOMIC 0x002UL /* tell cons. it can't sleep */ -#define SLAB_CTOR_VERIFY 0x004UL /* tell cons. it's a verify call */ - -extern void xmem_cache_init(void); -extern void xmem_cache_sizes_init(unsigned long); - -extern xmem_cache_t *xmem_find_general_cachep(size_t); -extern xmem_cache_t *xmem_cache_create( - const char *, size_t, size_t, unsigned long, - void (*)(void *, xmem_cache_t *, unsigned long), - void (*)(void *, xmem_cache_t *, unsigned long)); -extern int xmem_cache_destroy(xmem_cache_t *); -extern int xmem_cache_shrink(xmem_cache_t *); -extern void *xmem_cache_alloc(xmem_cache_t *); -extern void xmem_cache_free(xmem_cache_t *, void *); - -extern void *_xmalloc(size_t); -extern void xfree(const void *); - -extern int xmem_cache_reap(void); - -extern void dump_slabinfo(); +#define _xmalloc(size) __xmalloc(size, __FILE__, __LINE__) +#define xfree(ptr) __xfree(ptr, __FILE__, __LINE__) +extern void *__xmalloc(size_t size, const char *file, unsigned int line); +extern void __xfree(const void *p, const char *file, unsigned int line); /* Nicely typesafe for you. */ #define xmalloc(type) ((type *)_xmalloc(sizeof(type))) -- 2.30.2